--- /dev/null
+/*
+ * Originally from linux/drivers/char/mem.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Added devfs support.
+ * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
+ * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
+ */
+/*
+ * taken from
+ * linux/drivers/char/mem.c and linux-2.6-xen-sparse/drivers/xen/char/mem.c.
+ * adjusted for IA64 and made transparent.
+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
+ * VA Linux Systems Japan K.K.
+ */
+
+#include <linux/config.h>
+#include <linux/mm.h>
+#include <linux/efi.h>
+
+/*
+ * Architectures vary in how they handle caching for addresses
+ * outside of main memory.
+ *
+ */
+static inline int uncached_access(struct file *file, unsigned long addr)
+{
+ /*
+ * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
+ */
+ return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
+}
+
+int xen_mmap_mem(struct file * file, struct vm_area_struct * vma)
+{
+ unsigned long addr = vma->vm_pgoff << PAGE_SHIFT;
+ size_t size = vma->vm_end - vma->vm_start;
+
+
+#if 0
+ /*
+ *XXX FIXME: linux-2.6.16.29, linux-2.6.17
+ * valid_mmap_phys_addr_range() in linux/arch/ia64/kernel/efi.c
+ * fails checks.
+ * linux-2.6.18.1's returns always 1.
+ * Its comments says
+ *
+ * MMIO regions are often missing from the EFI memory map.
+ * We must allow mmap of them for programs like X, so we
+ * currently can't do any useful validation.
+ */
+ if (!valid_mmap_phys_addr_range(addr, &size))
+ return -EINVAL;
+ if (size < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+#endif
+
+ if (is_running_on_xen()) {
+ unsigned long offset = HYPERVISOR_ioremap(addr, size);
+ if (IS_ERR_VALUE(offset))
+ return offset;
+ }
+
+ if (uncached_access(file, vma->vm_pgoff << PAGE_SHIFT))
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
+ if (remap_pfn_range(vma,
+ vma->vm_start,
+ vma->vm_pgoff,
+ size,
+ vma->vm_page_prot))
+ return -EAGAIN;
+ return 0;
+}
(((bvec_to_bus((vec1)) + (vec1)->bv_len) == bvec_to_bus((vec2))) && \
((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
bvec_to_pseudophys((vec2))))
+
+/* We will be supplying our own /dev/mem implementation */
+#define ARCH_HAS_DEV_MEM
+#define ARCH_HAS_DEV_MEM_MMAP_MEM
+int xen_mmap_mem(struct file * file, struct vm_area_struct * vma);
#endif /* CONFIG_XEN */
# endif /* KERNEL */
ioremap (unsigned long offset, unsigned long size)
{
offset = HYPERVISOR_ioremap(offset, size);
+ if (IS_ERR_VALUE(offset))
+ return (void __iomem*)offset;
return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset));
}
}
#define ARCH_HAS_TRANSLATE_MEM_PTR 1
+#ifndef CONFIG_XEN
static __inline__ char *
xlate_dev_mem_ptr (unsigned long p)
{
return ptr;
}
+#else
+static __inline__ char *
+xlate_dev_mem_ptr (unsigned long p, ssize_t sz)
+{
+ unsigned long pfn = p >> PAGE_SHIFT;
+
+ if (pfn_valid(pfn) && !PageUncached(pfn_to_page(pfn)))
+ return __va(p);
+
+ return ioremap(p, sz);
+}
+
+static __inline__ void
+xlate_dev_mem_ptr_unmap (char* v)
+{
+ if (REGION_NUMBER(v) == RGN_UNCACHED)
+ iounmap(v);
+}
+#endif
/*
* Convert a virtual cached kernel memory pointer to an uncached pointer